From 82dc4c9fd8b278972528b70fdfdf3d316e413642 Mon Sep 17 00:00:00 2001 From: Keir Fraser Date: Mon, 15 Feb 2010 17:54:04 +0000 Subject: [PATCH] When tmem is enabled, reserve a fraction of memory for allocations of 0 --- xen/common/page_alloc.c | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c index 735c207e80..54920ff4db 100644 --- a/xen/common/page_alloc.c +++ b/xen/common/page_alloc.c @@ -224,6 +224,10 @@ static heap_by_zone_and_order_t *_heap[MAX_NUMNODES]; static unsigned long *avail[MAX_NUMNODES]; static long total_avail_pages; +/* TMEM: Reserve a fraction of memory for mid-size (0= 9)) && + (total_avail_pages <= midsize_alloc_zone_pages) ) + goto fail; + /* * Start with requested node, but exhaust all node memory in requested * zone before failing, only calc new node value if we fail to find memory @@ -336,6 +348,7 @@ static struct page_info *alloc_heap_pages( return pg; } + fail: /* No suitable memory blocks. Fail the request. */ spin_unlock(&heap_lock); return NULL; @@ -504,6 +517,10 @@ static void free_heap_pages( avail[node][zone] += 1 << order; total_avail_pages += 1 << order; + if ( opt_tmem ) + midsize_alloc_zone_pages = max( + midsize_alloc_zone_pages, total_avail_pages / MIDSIZE_ALLOC_FRAC); + /* Merge chunks as far as possible. */ while ( order < MAX_ORDER ) { @@ -842,7 +859,7 @@ static unsigned long avail_heap_pages( unsigned long total_free_pages(void) { - return total_avail_pages; + return total_avail_pages - midsize_alloc_zone_pages; } void __init end_boot_allocator(void) -- 2.30.2